TensorFlow: Optimizing Learning Rate


In [1]:
import math
import os
import pandas as pd
import numpy as np
from datetime import datetime

import tensorflow as tf
from tensorflow import data

print "TensorFlow : {}".format(tf.__version__)

SEED = 19831060


TensorFlow : 1.12.0

Download the Data


In [2]:
DATA_DIR='data'
# !mkdir $DATA_DIR
# !gsutil cp gs://cloud-samples-data/ml-engine/census/data/adult.data.csv $DATA_DIR
# !gsutil cp gs://cloud-samples-data/ml-engine/census/data/adult.test.csv $DATA_DIR
TRAIN_DATA_FILE = os.path.join(DATA_DIR, 'adult.data.csv')
EVAL_DATA_FILE = os.path.join(DATA_DIR, 'adult.test.csv')

In [3]:
TRAIN_DATA_SIZE = 32561
EVAL_DATA_SIZE = 16278

Dataset Metadata


In [4]:
HEADER = ['age', 'workclass', 'fnlwgt', 'education', 'education_num',
               'marital_status', 'occupation', 'relationship', 'race', 'gender',
               'capital_gain', 'capital_loss', 'hours_per_week',
               'native_country', 'income_bracket']

HEADER_DEFAULTS = [[0], [''], [0], [''], [0], [''], [''], [''], [''], [''],
                       [0], [0], [0], [''], ['']]

NUMERIC_FEATURE_NAMES = ['age', 'education_num', 'capital_gain', 'capital_loss', 'hours_per_week']
CATEGORICAL_FEATURE_NAMES = ['gender', 'race', 'education', 'marital_status', 'relationship', 
                             'workclass', 'occupation', 'native_country']

FEATURE_NAMES = NUMERIC_FEATURE_NAMES + CATEGORICAL_FEATURE_NAMES
TARGET_NAME = 'income_bracket'
TARGET_LABELS = [' <=50K', ' >50K']
WEIGHT_COLUMN_NAME = 'fnlwgt'
NUM_CLASSES = len(TARGET_LABELS)

def get_categorical_features_vocabolary():
    data = pd.read_csv(TRAIN_DATA_FILE, names=HEADER)
    return {
        column: list(data[column].unique()) 
        for column in data.columns if column in CATEGORICAL_FEATURE_NAMES
    }

In [5]:
feature_vocabolary = get_categorical_features_vocabolary()
print(feature_vocabolary)


{'workclass': [' State-gov', ' Self-emp-not-inc', ' Private', ' Federal-gov', ' Local-gov', ' ?', ' Self-emp-inc', ' Without-pay', ' Never-worked'], 'relationship': [' Not-in-family', ' Husband', ' Wife', ' Own-child', ' Unmarried', ' Other-relative'], 'gender': [' Male', ' Female'], 'marital_status': [' Never-married', ' Married-civ-spouse', ' Divorced', ' Married-spouse-absent', ' Separated', ' Married-AF-spouse', ' Widowed'], 'race': [' White', ' Black', ' Asian-Pac-Islander', ' Amer-Indian-Eskimo', ' Other'], 'native_country': [' United-States', ' Cuba', ' Jamaica', ' India', ' ?', ' Mexico', ' South', ' Puerto-Rico', ' Honduras', ' England', ' Canada', ' Germany', ' Iran', ' Philippines', ' Italy', ' Poland', ' Columbia', ' Cambodia', ' Thailand', ' Ecuador', ' Laos', ' Taiwan', ' Haiti', ' Portugal', ' Dominican-Republic', ' El-Salvador', ' France', ' Guatemala', ' China', ' Japan', ' Yugoslavia', ' Peru', ' Outlying-US(Guam-USVI-etc)', ' Scotland', ' Trinadad&Tobago', ' Greece', ' Nicaragua', ' Vietnam', ' Hong', ' Ireland', ' Hungary', ' Holand-Netherlands'], 'education': [' Bachelors', ' HS-grad', ' 11th', ' Masters', ' 9th', ' Some-college', ' Assoc-acdm', ' Assoc-voc', ' 7th-8th', ' Doctorate', ' Prof-school', ' 5th-6th', ' 10th', ' 1st-4th', ' Preschool', ' 12th'], 'occupation': [' Adm-clerical', ' Exec-managerial', ' Handlers-cleaners', ' Prof-specialty', ' Other-service', ' Sales', ' Craft-repair', ' Transport-moving', ' Farming-fishing', ' Machine-op-inspct', ' Tech-support', ' ?', ' Protective-serv', ' Armed-Forces', ' Priv-house-serv']}

Building a TensorFlow Custom Estimator

  1. Creating feature columns
  2. Creating model_fn
  3. Create estimator using the model_fn
  4. Define data input_fn
  5. Define Train and evaluate experiment
  6. Run experiment with parameters

1. Create feature columns


In [6]:
def create_feature_columns():
    
    feature_columns = []
    
    for column in NUMERIC_FEATURE_NAMES:
        feature_column = tf.feature_column.numeric_column(column)
        feature_columns.append(feature_column)
        
    for column in CATEGORICAL_FEATURE_NAMES:
        vocabolary = feature_vocabolary[column]
        embed_size = round(math.sqrt(len(vocabolary)) * 1.5)
        feature_column = tf.feature_column.embedding_column(
            tf.feature_column.categorical_column_with_vocabulary_list(column, vocabolary), 
            embed_size)
        feature_columns.append(feature_column)
        
    return feature_columns

2. Create model_fn

  1. Use feature columns to create input_layer
  2. Use tf.keras.layers to define the model architecutre and output
  3. Use binary_classification_head for create EstimatorSpec

In [7]:
from tensorflow.python.ops import math_ops

def find_learning_rate(params):
    
    training_step = tf.cast(tf.train.get_global_step(), tf.float32)
    factor = tf.cast(tf.multiply(1.e-5, training_step*training_step), tf.float32)
    learning_rate = tf.add(params.learning_rate, factor)
    return learning_rate
    

def update_learning_rate(params):
    
    training_step = tf.cast(tf.train.get_global_step(), tf.int32)
    base_cycle = tf.floordiv(training_step, params.cycle_length)
    current_cycle = tf.cast(tf.round(tf.sqrt(tf.cast(base_cycle, tf.float32))) + 1, tf.int32)
    current_cycle_length = tf.cast(tf.multiply(current_cycle, params.cycle_length), tf.int32)
    cycle_step = tf.mod(training_step, current_cycle_length)

    learning_rate = tf.cond(
        tf.equal(cycle_step, 0),
        lambda: params.learning_rate,
        lambda: tf.train.cosine_decay(
            learning_rate=params.learning_rate,
            global_step=cycle_step,
            decay_steps=current_cycle_length,
            alpha=0.0,
        )
    )
    
    tf.summary.scalar('base_cycle', base_cycle)
    tf.summary.scalar('current_cycle', current_cycle)
    tf.summary.scalar('current_cycle_length', current_cycle_length)
    tf.summary.scalar('cycle_step', cycle_step)
    tf.summary.scalar('learning_rate', learning_rate)

    return learning_rate

def model_fn(features, labels, mode, params):
    
    is_training = True if mode == tf.estimator.ModeKeys.TRAIN else False
    
    # model body
    def _inference(features, mode, params):
        
        feature_columns = create_feature_columns()
        input_layer = tf.feature_column.input_layer(features=features, feature_columns=feature_columns)
        dense_inputs = input_layer
        for i in range(len(params.hidden_units)):
            dense = tf.keras.layers.Dense(params.hidden_units[i], activation='relu')(dense_inputs)
            dense_dropout = tf.keras.layers.Dropout(params.dropout_prob)(dense, training=is_training)
            dense_inputs = dense_dropout
        fully_connected = dense_inputs  
        logits = tf.keras.layers.Dense(units=1, name='logits', activation=None)(fully_connected)
        return logits
    
    # model head
    head = tf.contrib.estimator.binary_classification_head(
        label_vocabulary=TARGET_LABELS,
        weight_column=WEIGHT_COLUMN_NAME
    )
    
    learning_rate = find_learning_rate(params) if params.lr_search else update_learning_rate(params)
    
    return head.create_estimator_spec(
        features=features,
        mode=mode,
        logits=_inference(features, mode, params),
        labels=labels,
        optimizer=tf.train.AdamOptimizer(learning_rate)
    )

3. Create estimator


In [8]:
def create_estimator(params, run_config):
    
    feature_columns = create_feature_columns()
    
    estimator = tf.estimator.Estimator(
        model_fn,
        params=params,
        config=run_config
    )
    
    return estimator

4. Data Input Function


In [9]:
def make_input_fn(file_pattern, batch_size, num_epochs, 
                  mode=tf.estimator.ModeKeys.EVAL):
    
    def _input_fn():
        dataset = tf.data.experimental.make_csv_dataset(
            file_pattern=file_pattern,
            batch_size=batch_size,
            column_names=HEADER,
            column_defaults=HEADER_DEFAULTS,
            label_name=TARGET_NAME,
            field_delim=',',
            use_quote_delim=True,
            header=False,
            num_epochs=num_epochs,
            shuffle=(mode==tf.estimator.ModeKeys.TRAIN)
        )
        
        iterator = dataset.make_one_shot_iterator()
        features, target = iterator.get_next()
        return features, target
    
    return _input_fn

5. Experiment Definition


In [10]:
def train_and_evaluate_experiment(params, run_config):
    
    # TrainSpec ####################################
    train_input_fn = make_input_fn(
        TRAIN_DATA_FILE,
        batch_size=params.batch_size,
        num_epochs=None,
        mode=tf.estimator.ModeKeys.TRAIN
    )
    
    train_spec = tf.estimator.TrainSpec(
        input_fn = train_input_fn,
        max_steps=params.traning_steps
    )
    ###############################################    
    
    # EvalSpec ####################################
    eval_input_fn = make_input_fn(
        EVAL_DATA_FILE,
        num_epochs=1,
        batch_size=params.batch_size,
    )

    eval_spec = tf.estimator.EvalSpec(
        name=datetime.utcnow().strftime("%H%M%S"),
        input_fn = eval_input_fn,
        steps=None,
        start_delay_secs=0,
        throttle_secs=params.eval_throttle_secs
    )
    ###############################################

    tf.logging.set_verbosity(tf.logging.INFO)
    
    if tf.gfile.Exists(run_config.model_dir):
        print("Removing previous artefacts...")
        tf.gfile.DeleteRecursively(run_config.model_dir)
            
    print ''
    estimator = create_estimator(params, run_config)
    print ''
    
    time_start = datetime.utcnow() 
    print("Experiment started at {}".format(time_start.strftime("%H:%M:%S")))
    print(".......................................") 

#     tf.estimator.train_and_evaluate(
#         estimator=estimator,
#         train_spec=train_spec, 
#         eval_spec=eval_spec
#     )

    estimator.train(train_input_fn, steps=params.traning_steps)

    time_end = datetime.utcnow() 
    print(".......................................")
    print("Experiment finished at {}".format(time_end.strftime("%H:%M:%S")))
    print("")
    time_elapsed = time_end - time_start
    print("Experiment elapsed time: {} seconds".format(time_elapsed.total_seconds()))

6. Run Experiment with Parameters


In [11]:
MODELS_LOCATION = 'models/census'
MODEL_NAME = 'dnn_classifier-01'
model_dir = os.path.join(MODELS_LOCATION, MODEL_NAME)

BATCH_SIZE = 64
NUM_EPOCHS = 10
steps_per_epoch = int(math.ceil((TRAIN_DATA_SIZE / BATCH_SIZE)))
training_steps = int(steps_per_epoch * NUM_EPOCHS)

print("Training data size: {}".format(TRAIN_DATA_SIZE))
print("Btach data size: {}".format(BATCH_SIZE))
print("Steps per epoch: {}".format(steps_per_epoch))
print("Traing epochs: {}".format(NUM_EPOCHS))
print("Training steps: {}".format(training_steps))

params  = tf.contrib.training.HParams(
    batch_size=BATCH_SIZE,
    traning_steps=training_steps,
    hidden_units=[64, 32],
    learning_rate=1.e-3,
    cycle_length=500,
    dropout_prob=0.1,
    eval_throttle_secs=0,
    lr_search=False
)

run_config = tf.estimator.RunConfig(
    tf_random_seed=SEED,
    save_checkpoints_steps=steps_per_epoch,
    log_step_count_steps=100,
    save_summary_steps=1,
    keep_checkpoint_max=3,
    model_dir=model_dir,
)


Training data size: 32561
Btach data size: 64
Steps per epoch: 508
Traing epochs: 10
Training steps: 5080

In [12]:
train_and_evaluate_experiment(params, run_config)


Removing previous artefacts...

INFO:tensorflow:Using config: {'_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true
graph_options {
  rewrite_options {
    meta_optimizer_iterations: ONE
  }
}
, '_keep_checkpoint_max': 3, '_task_type': 'worker', '_train_distribute': None, '_is_chief': True, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x11d9116d0>, '_model_dir': 'models/census/dnn_classifier-01', '_protocol': None, '_save_checkpoints_steps': 508, '_keep_checkpoint_every_n_hours': 10000, '_service': None, '_num_ps_replicas': 0, '_tf_random_seed': 19831060, '_save_summary_steps': 1, '_device_fn': None, '_experimental_distribute': None, '_num_worker_replicas': 1, '_task_id': 0, '_log_step_count_steps': 100, '_evaluation_master': '', '_eval_distribute': None, '_global_id_in_cluster': 0, '_master': ''}

Experiment started at 22:03:53
.......................................
INFO:tensorflow:Calling model_fn.
INFO:tensorflow:Done calling model_fn.
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Graph was finalized.
INFO:tensorflow:Running local_init_op.
INFO:tensorflow:Done running local_init_op.
INFO:tensorflow:Saving checkpoints for 0 into models/census/dnn_classifier-01/model.ckpt.
---------------------------------------------------------------------------
InvalidArgumentError                      Traceback (most recent call last)
<ipython-input-12-dec9477e6e51> in <module>()
----> 1 train_and_evaluate_experiment(params, run_config)

<ipython-input-10-1753eb81587c> in train_and_evaluate_experiment(params, run_config)
     51 #     )
     52 
---> 53     estimator.train(train_input_fn, steps=params.traning_steps)
     54 
     55     time_end = datetime.utcnow()

/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.pyc in train(self, input_fn, hooks, steps, max_steps, saving_listeners)
    352 
    353       saving_listeners = _check_listeners_type(saving_listeners)
--> 354       loss = self._train_model(input_fn, hooks, saving_listeners)
    355       logging.info('Loss for final step: %s.', loss)
    356       return self

/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.pyc in _train_model(self, input_fn, hooks, saving_listeners)
   1205       return self._train_model_distributed(input_fn, hooks, saving_listeners)
   1206     else:
-> 1207       return self._train_model_default(input_fn, hooks, saving_listeners)
   1208 
   1209   def _train_model_default(self, input_fn, hooks, saving_listeners):

/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.pyc in _train_model_default(self, input_fn, hooks, saving_listeners)
   1239       return self._train_with_estimator_spec(estimator_spec, worker_hooks,
   1240                                              hooks, global_step_tensor,
-> 1241                                              saving_listeners)
   1242 
   1243   def _train_model_distributed(self, input_fn, hooks, saving_listeners):

/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.pyc in _train_with_estimator_spec(self, estimator_spec, worker_hooks, hooks, global_step_tensor, saving_listeners)
   1469       loss = None
   1470       while not mon_sess.should_stop():
-> 1471         _, loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss])
   1472     return loss
   1473 

/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.pyc in run(self, fetches, feed_dict, options, run_metadata)
    669                           feed_dict=feed_dict,
    670                           options=options,
--> 671                           run_metadata=run_metadata)
    672 
    673   def run_step_fn(self, step_fn):

/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.pyc in run(self, fetches, feed_dict, options, run_metadata)
   1154                               feed_dict=feed_dict,
   1155                               options=options,
-> 1156                               run_metadata=run_metadata)
   1157       except _PREEMPTION_ERRORS as e:
   1158         logging.info('An error was raised. This may be due to a preemption in '

/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.pyc in run(self, *args, **kwargs)
   1253         raise six.reraise(*original_exc_info)
   1254       else:
-> 1255         raise six.reraise(*original_exc_info)
   1256 
   1257 

/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.pyc in run(self, *args, **kwargs)
   1238   def run(self, *args, **kwargs):
   1239     try:
-> 1240       return self._sess.run(*args, **kwargs)
   1241     except _PREEMPTION_ERRORS:
   1242       raise

/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.pyc in run(self, fetches, feed_dict, options, run_metadata)
   1310                                   feed_dict=feed_dict,
   1311                                   options=options,
-> 1312                                   run_metadata=run_metadata)
   1313 
   1314     for hook in self._hooks:

/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.pyc in run(self, *args, **kwargs)
   1074 
   1075   def run(self, *args, **kwargs):
-> 1076     return self._sess.run(*args, **kwargs)
   1077 
   1078   def run_step_fn(self, step_fn, raw_session, run_with_hooks):

/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/client/session.pyc in run(self, fetches, feed_dict, options, run_metadata)
    927     try:
    928       result = self._run(None, fetches, feed_dict, options_ptr,
--> 929                          run_metadata_ptr)
    930       if run_metadata:
    931         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/client/session.pyc in _run(self, handle, fetches, feed_dict, options, run_metadata)
   1150     if final_fetches or final_targets or (handle and feed_dict_tensor):
   1151       results = self._do_run(handle, final_targets, final_fetches,
-> 1152                              feed_dict_tensor, options, run_metadata)
   1153     else:
   1154       results = []

/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/client/session.pyc in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1326     if handle is None:
   1327       return self._do_call(_run_fn, feeds, fetches, targets, options,
-> 1328                            run_metadata)
   1329     else:
   1330       return self._do_call(_prun_fn, handle, feeds, fetches)

/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/client/session.pyc in _do_call(self, fn, *args)
   1346           pass
   1347       message = error_interpolation.interpolate(message, self._graph)
-> 1348       raise type(e)(node_def, op, message)
   1349 
   1350   def _extend_graph(self):

InvalidArgumentError: Integer division by zero
	 [[node FloorMod (defined at <ipython-input-7-fc13f81c40ce>:17)  = FloorMod[T=DT_INT32, _class=["loc:@cond/CosineDecay/Cast_1/Switch"], _device="/job:localhost/replica:0/task:0/device:CPU:0"](Cast, Mul)]]

Caused by op u'FloorMod', defined at:
  File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py", line 162, in _run_module_as_main
    "__main__", fname, loader, pkg_name)
  File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py", line 72, in _run_code
    exec code in run_globals
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/ipykernel_launcher.py", line 16, in <module>
    app.launch_new_instance()
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/traitlets/config/application.py", line 658, in launch_instance
    app.start()
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/ipykernel/kernelapp.py", line 486, in start
    self.io_loop.start()
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tornado/ioloop.py", line 1008, in start
    self._run_callback(self._callbacks.popleft())
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tornado/ioloop.py", line 759, in _run_callback
    ret = callback()
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tornado/stack_context.py", line 276, in null_wrapper
    return fn(*args, **kwargs)
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/zmq/eventloop/zmqstream.py", line 536, in <lambda>
    self.io_loop.add_callback(lambda : self._handle_events(self.socket, 0))
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/zmq/eventloop/zmqstream.py", line 450, in _handle_events
    self._handle_recv()
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/zmq/eventloop/zmqstream.py", line 480, in _handle_recv
    self._run_callback(callback, msg)
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/zmq/eventloop/zmqstream.py", line 432, in _run_callback
    callback(*args, **kwargs)
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tornado/stack_context.py", line 276, in null_wrapper
    return fn(*args, **kwargs)
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/ipykernel/kernelbase.py", line 283, in dispatcher
    return self.dispatch_shell(stream, msg)
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/ipykernel/kernelbase.py", line 233, in dispatch_shell
    handler(stream, idents, msg)
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/ipykernel/kernelbase.py", line 399, in execute_request
    user_expressions, allow_stdin)
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/ipykernel/ipkernel.py", line 208, in do_execute
    res = shell.run_cell(code, store_history=store_history, silent=silent)
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/ipykernel/zmqshell.py", line 537, in run_cell
    return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 2714, in run_cell
    interactivity=interactivity, compiler=compiler, result=result)
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 2824, in run_ast_nodes
    if self.run_code(code, result):
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 2878, in run_code
    exec(code_obj, self.user_global_ns, self.user_ns)
  File "<ipython-input-12-dec9477e6e51>", line 1, in <module>
    train_and_evaluate_experiment(params, run_config)
  File "<ipython-input-10-1753eb81587c>", line 53, in train_and_evaluate_experiment
    estimator.train(train_input_fn, steps=params.traning_steps)
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 354, in train
    loss = self._train_model(input_fn, hooks, saving_listeners)
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 1207, in _train_model
    return self._train_model_default(input_fn, hooks, saving_listeners)
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 1237, in _train_model_default
    features, labels, model_fn_lib.ModeKeys.TRAIN, self.config)
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 1195, in _call_model_fn
    model_fn_results = self._model_fn(features=features, **kwargs)
  File "<ipython-input-7-fc13f81c40ce>", line 62, in model_fn
    learning_rate = find_learning_rate(params) if params.lr_search else update_learning_rate(params)
  File "<ipython-input-7-fc13f81c40ce>", line 17, in update_learning_rate
    cycle_step = tf.mod(training_step, current_cycle_length)
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/ops/gen_math_ops.py", line 3142, in floor_mod
    "FloorMod", x=x, y=y, name=name)
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
    op_def=op_def)
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/util/deprecation.py", line 488, in new_func
    return func(*args, **kwargs)
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 3274, in create_op
    op_def=op_def)
  File "/Users/khalidsalama/Technology/python-venvs/py27-venv/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1770, in __init__
    self._traceback = tf_stack.extract_stack()

InvalidArgumentError (see above for traceback): Integer division by zero
	 [[node FloorMod (defined at <ipython-input-7-fc13f81c40ce>:17)  = FloorMod[T=DT_INT32, _class=["loc:@cond/CosineDecay/Cast_1/Switch"], _device="/job:localhost/replica:0/task:0/device:CPU:0"](Cast, Mul)]]

In [ ]: